Live migration shadow table support for writable page tables.
set_bit(DF_CONSTRUCTED, &p->flags);
+ new_thread(p, dsi.v_kernentry, vstack_end, vstartinfo_start);
+
#if 0 /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
- shadow_mode_enable(&p->mm, SHM_test);
+ shadow_lock(&p->mm);
+ shadow_mode_enable(p, SHM_test);
+ shadow_unlock(&p->mm);
#endif
- new_thread(p, dsi.v_kernentry, vstack_end, vstartinfo_start);
-
return 0;
}
MEM_LOG("ptwr: Could not update pte at %p\n", writable_pte);
domain_crash();
}
+
+ if ( unlikely(current->mm.shadow_mode) )
+ {
+ unsigned long spte;
+ unsigned long sstat =
+ get_shadow_status(¤t->mm,
+ ptwr_info[cpu].disconnected_pte >> PAGE_SHIFT);
+
+ if ( sstat & PSH_shadowed )
+ {
+ int i;
+ unsigned long spfn = sstat & PSH_pfn_mask;
+ l1_pgentry_t *sl1e = map_domain_mem( spfn << PAGE_SHIFT );
+
+ for( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) {
+ l1pte_no_fault( ¤t->mm,
+ &l1_pgentry_val(
+ ptwr_info[cpu].disconnected_page[i]),
+ &l1_pgentry_val(sl1e[i]) );
+ }
+ unmap_domain_mem( sl1e );
+ put_shadow_status(¤t->mm);
+ }
+
+ l1pte_no_fault( ¤t->mm,
+ &pte, &spte );
+ __put_user(spte, (unsigned long *)&shadow_linear_pg_table
+ [ptwr_info[cpu].disconnected_l1va>>PAGE_SHIFT] );
+ }
+
__flush_tlb_one(ptwr_info[cpu].disconnected_l1va);
PTWR_PRINTK(PP_A, ("[A] disconnected_l1va at %p now %08lx\n",
writable_pte, pte));
MEM_LOG("ptwr: Could not update pte at %p\n", writable_pte);
domain_crash();
}
+
+ if ( unlikely(current->mm.shadow_mode) )
+ {
+ unsigned long spte;
+ unsigned long sstat =
+ get_shadow_status(¤t->mm,
+ ptwr_info[cpu].writable_pte >> PAGE_SHIFT);
+
+ if ( sstat & PSH_shadowed )
+ {
+ int i;
+ unsigned long spfn = sstat & PSH_pfn_mask;
+ l1_pgentry_t *sl1e = map_domain_mem( spfn << PAGE_SHIFT );
+
+ for( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) {
+ l1pte_no_fault( ¤t->mm,
+ &l1_pgentry_val(
+ ptwr_info[cpu].writable_page[i]),
+ &l1_pgentry_val(sl1e[i]) );
+
+ }
+ unmap_domain_mem( sl1e );
+ put_shadow_status(¤t->mm);
+ }
+
+ l1pte_no_fault( ¤t->mm,
+ &pte, &spte );
+ __put_user(spte, (unsigned long *)&shadow_linear_pg_table
+ [ptwr_info[cpu].writable_l1va>>PAGE_SHIFT] );
+ }
+
__flush_tlb_one(ptwr_info[cpu].writable_l1va);
PTWR_PRINTK(PP_I, ("[I] disconnected_l1va at %p now %08lx\n",
writable_pte, pte));
(__get_user(pte, (unsigned long *)
&linear_pg_table[addr >> PAGE_SHIFT]) == 0) )
{
+ if( (pte & _PAGE_RW) && (pte & _PAGE_PRESENT) )
+ return 0; /* we can't help. Maybe shadow mode can? */
+
pfn = pte >> PAGE_SHIFT;
#if 0
PTWR_PRINTK(PP_ALL, ("check pte %08lx = pfn %08lx for va %08lx\n", pte,
&linear_pg_table[addr>>PAGE_SHIFT]);
domain_crash();
}
- return 1;
+
+ if( unlikely(current->mm.shadow_mode) )
+ return 0; /* fall through to shadow mode to propagate */
+ else
+ return 1;
}
}
return 0;
// write back updated gpte
// XXX watch out for read-only L2 entries! (not used in Linux)
if ( unlikely( __put_user( gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
- BUG(); // fixme!
+ domain_crash(); // fixme!
if ( unlikely( __put_user( spte, (unsigned long*)&shadow_linear_pg_table[va>>PAGE_SHIFT])) )
{
// failed:
// the L1 may not be shadowed, or the L2 entry may be insufficient
- unsigned long gpde, spde, gl1pfn, sl1pfn;
+ unsigned long gpde, spde, gl1pfn, sl1pfn, sl1ss;
SH_VVLOG("3: not shadowed or l2 insufficient gpte=%08lx spte=%08lx",gpte,spte );
gl1pfn = gpde>>PAGE_SHIFT;
-
- if ( ! (sl1pfn=__shadow_status(¤t->mm, gl1pfn) ) )
+ sl1ss = __shadow_status(¤t->mm, gl1pfn);
+ if ( ! (sl1ss & PSH_shadowed) )
{
// this L1 is NOT already shadowed so we need to shadow it
struct pfn_info *sl1pfn_info;
SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )",sl1pfn);
+ sl1pfn = sl1ss & PSH_pfn_mask;
l2pde_general( m, &gpde, &spde, sl1pfn );
linear_l2_table[va>>L2_PAGETABLE_SHIFT] = mk_l2_pgentry(gpde);
}
-void show_stack(unsigned long *esp)
+void show_trace(unsigned long *esp)
{
unsigned long *stack, addr;
int i;
+ printk("Call Trace from ESP=%p: ", esp);
+ stack = esp;
+ i = 0;
+ while (((long) stack & (STACK_SIZE-1)) != 0) {
+ addr = *stack++;
+ if (kernel_text_address(addr)) {
+ if (i && ((i % 6) == 0))
+ printk("\n ");
+ printk("[<%08lx>] ", addr);
+ i++;
+ }
+ }
+ printk("\n");
+}
+
+void show_stack(unsigned long *esp)
+{
+ unsigned long *stack;
+ int i;
+
printk("Stack trace from ESP=%p:\n", esp);
stack = esp;
}
printk("\n");
- printk("Call Trace from ESP=%p: ", esp);
- stack = esp;
- i = 0;
- while (((long) stack & (STACK_SIZE-1)) != 0) {
- addr = *stack++;
- if (kernel_text_address(addr)) {
- if (i && ((i % 6) == 0))
- printk("\n ");
- printk("[<%08lx>] ", addr);
- i++;
- }
- }
- printk("\n");
+ show_trace( esp );
}
void show_registers(struct pt_regs *regs)
}
else
{
- extern void show_traceX(void);
SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
mfn, pfn, m->shadow_dirty_bitmap_size, m );
- SH_LOG("dom=%u caf=%08x taf=%08x\n",
- frame_table[mfn].u.inuse.domain->domain,
+ SH_LOG("dom=%p caf=%08x taf=%08x\n",
+ frame_table[mfn].u.inuse.domain,
frame_table[mfn].count_info,
frame_table[mfn].u.inuse.type_info );
+ {
+ extern void show_trace(unsigned long *esp);
+ unsigned long *esp;
+ __asm__ __volatile__ ("movl %%esp,%0" : "=r" (esp) : );
+ show_trace(esp);
+ }
}
return rc;
static inline int mark_dirty( struct mm_struct *m, unsigned int mfn )
{
int rc;
- ASSERT(local_irq_is_enabled());
+ //ASSERT(local_irq_is_enabled());
//if(spin_is_locked(&m->shadow_lock)) printk("+");
shadow_lock(m);
rc = __mark_dirty( m, mfn );
independnetly.
*/
- ASSERT(local_irq_is_enabled());
+ //ASSERT(local_irq_is_enabled());
//if(spin_is_locked(&m->shadow_lock)) printk("*");
shadow_lock(m);
if ( unlikely(mm->shadow_mode) )
{
- ASSERT(local_irq_is_enabled());
+ //ASSERT(local_irq_is_enabled());
shadow_lock(mm);
__shadow_mk_pagetable(mm);
shadow_unlock(mm);